Simulation #1

True score 1RM stays the same across two measures, but observed score varies due to biological variation and predicted (or practical in this case) also varies due to the measurement error.

require(tidyverse)
require(bmbstats)
require(DT)
require(cowplot)

biological_fixed <- 0
biological_proportional <- 1
biological_random <- 3

measurement_fixed <- 0
measurement_proportional <- 1
measurement_random <- 3

n_athletes <- 50

SESOI <- 5

random.seed <- 1667
boot_samples <- 2000
confidence <- 0.95
boot_type <- "bca"

# ==========================================
set.seed(random.seed)

data_1RM <- tibble(
  Athlete = paste0("Athlete", seq(1, n_athletes)),
  TS1 = rnorm(n_athletes, 100, 10),
  TS2 = TS1 + rnorm(n_athletes, 0, 0),
  TSdiff = TS2 - TS1,
  OS1 = biological_fixed + biological_proportional * TS1 + rnorm(n_athletes, 0, biological_random),
  OS2 = biological_fixed + biological_proportional * TS2 + rnorm(n_athletes, 0, biological_random),
  OSdiff = OS2 - OS1,
  PS1 = measurement_fixed + measurement_proportional * OS1 + rnorm(n_athletes, 0, measurement_random),
  PS2 = measurement_fixed + measurement_proportional * OS2 + rnorm(n_athletes, 0, measurement_random),
  PSdiff = PS2 - PS1
)

datatable(data_1RM, rownames = FALSE, filter = "top") %>%
  formatRound(columns = 2:ncol(data_1RM), digits = 2)

Analysis and plotting functions

my_estimators <- function(data,
                          trial1,
                          trial2,
                          SESOI_lower = 0,
                          SESOI_upper = 0,
                          na.rm = FALSE) {

  predicted <- data[[trial1]]
  observed <- data[[trial2]]
  
  SESOI_range <- SESOI_upper - SESOI_lower

  mean_diff <- mean(predicted - observed, na.rm = na.rm)
  # perc_diff <- 100 * mean((predicted - observed) / observed, na.rm = na.rm)
  sd_diff <- sd(predicted - observed, na.rm = na.rm)

  model <- lm(observed ~ predicted)
  rse <- summary(model)$sigma

  pper <- PPER(
    sigma = rse,
    SESOI_lower = SESOI_lower,
    SESOI_upper = SESOI_upper,
    df = length(observed) - 2
  )

  sdc <- rse * stats::qt(1 - ((1 - 0.95) / 2), df = length(observed) - 2)

  # TE <- rse / sqrt(2)

  # cv <- 100 * TE / mean(observed)

  MAD <- mean(abs(predicted - observed), na.rm = na.rm)

  # MAPD <- 100 * mean(abs((predicted - observed) / observed), na.rm = na.rm)

  c(
    # `SESOI lower` = SESOI_lower,
    # `SESOI_upper` = SESOI_upper,
    `Mean Diff` = mean_diff,
    # `% Diff` = perc_diff,
    `SD Diff` = sd_diff,
    MAD = MAD,
    # Intercept = coef(model)[[1]],
    # Slope = coef(model)[[2]],
    RSE = rse,
    PPER = pper,
    # TE = TE,
    SDC = sdc
    # `%CV` = cv,
    # MAPD = MAPD
  )
}

# ===========================
analyse_func <- function(data, predictor, outcome) {
  data <- data.frame(
    predictor = data[[predictor]],
    outcome = data[[outcome]]
  )
  
  estimators <- my_estimators(
    data = na.omit(data),
    trial1 = "predictor",
    trial2 = "outcome",
    SESOI_lower = -SESOI,
    SESOI_upper = SESOI
  )
  
  estimators_text <- paste0(names(estimators), "=", round(estimators, 2), collapse = ", ")
  
  # Bland Altman
  gg <- plot_grid(
    ggdraw() + draw_label(estimators_text,
      fontface='italic', size = 10),
    plot_grid(
      
      plot_pair_changes(
      group_a = data$predictor,
      group_b  = data$outcome,
      group_a_label = predictor,
      group_b_label = outcome,
      SESOI_lower = -SESOI,
      SESOI_upper = SESOI),
      
      plot_pair_lm(
      predictor = data$predictor,
      outcome = data$outcome,
      predictor_label = predictor,
      outcome_label = outcome,
      SESOI_lower = -SESOI,
      SESOI_upper = SESOI,
      na.rm = TRUE,
      control = plot_control(panel_labels = c(NULL, NULL))),
      nrow = 1,
      rel_widths = c(1, 2)),
    
    ncol = 1,
    rel_heights = c(0.1, 1))

  print(gg)
  
  boot_estimators <- reliability_analysis(
  data,
  trial1 = "predictor",
  trial2 = "outcome",
  SESOI_lower = function(...) -SESOI,
  SESOI_upper = function(...) SESOI,
  estimator_function = my_estimators,
  control = model_control(
    boot_type = boot_type,
    boot_samples = boot_samples,
    seed = random.seed,
    iter = FALSE
  ),
  na.rm = TRUE
)
  print(boot_estimators)
return(boot_estimators)
  
}

Plotting and analysis

Repeatability

TS1_TS2 <- analyse_func(data_1RM,  "TS1", "TS2")
#> [1] "All values of t are equal to  1 \n Cannot calculate confidence intervals"
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator        value lower        upper
#>  Mean Diff 0.000000e+00    NA           NA
#>    SD Diff 0.000000e+00    NA           NA
#>        MAD 0.000000e+00    NA           NA
#>        RSE 3.204942e-16     0 4.562373e-16
#>       PPER 1.000000e+00    NA           NA
#>        SDC 6.443967e-16     0 9.173266e-16

OS1_OS2 <- analyse_func(data_1RM,  "OS1", "OS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower     upper
#>  Mean Diff 0.1989543 -0.9518448 1.2243814
#>    SD Diff 3.9189576  3.3863430 5.0397272
#>        MAD 3.2865627  2.7698220 3.9811601
#>        RSE 3.6309941  3.2069235 4.4229834
#>       PPER 0.8251096  0.7361123 0.8744657
#>        SDC 7.3006029  6.4479519 8.8930041

PS1_PS2 <- analyse_func(data_1RM,  "PS1", "PS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower      upper
#>  Mean Diff -0.1440930 -2.1090574  1.5880397
#>    SD Diff  6.7266837  5.7008578  8.1745999
#>        MAD  5.4175211  4.4707010  6.6616170
#>        RSE  5.7337687  4.9491072  6.9338084
#>       PPER  0.6124651  0.5256624  0.6825777
#>        SDC 11.5285146  9.9508470 13.9413562

Validity (Prediction)

Observed vs True

OS1_TS1 <- analyse_func(data_1RM,  "OS1", "TS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value     lower      upper
#>  Mean Diff -0.6486222 -1.412789 0.07847829
#>    SD Diff  2.7669426  2.368255 3.33230593
#>        MAD  2.2802496  1.822822 2.77875002
#>        RSE  2.5832908  2.216105 3.13354241
#>       PPER  0.9411704  0.882704 0.97134208
#>        SDC  5.1940542  4.455777 6.30040928

OS2_TS2 <- analyse_func(data_1RM,  "OS2", "TS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower      upper
#>  Mean Diff -0.8475765 -1.5309040 -0.1400448
#>    SD Diff  2.5523202  2.2026842  3.0327781
#>        MAD  2.1829967  1.7815176  2.6322982
#>        RSE  2.5678697  2.2188811  3.1095988
#>       PPER  0.9426184  0.8854911  0.9711095
#>        SDC  5.1630480  4.4613595  6.2522674

OSdiff_TSdiff <- analyse_func(data_1RM,  "OSdiff", "TSdiff")
#> [1] "All values of t are equal to  1 \n Cannot calculate confidence intervals"
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value     lower     upper
#>  Mean Diff -0.1989543 -1.224381 0.9518448
#>    SD Diff  3.9189576  3.386343 5.0397272
#>        MAD  3.2865627  2.769822 3.9811601
#>        RSE  0.0000000        NA        NA
#>       PPER  1.0000000        NA        NA
#>        SDC  0.0000000        NA        NA

Predicted vs True

PS1_TS1 <- analyse_func(data_1RM,  "PS1", "TS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff -0.8103587 -2.0602135 0.4857175
#>    SD Diff  4.6875924  4.0168326 5.6232430
#>        MAD  3.7346413  3.0057163 4.6084908
#>        RSE  3.8726629  3.3879259 4.6394978
#>       PPER  0.7971479  0.7134523 0.8534933
#>        SDC  7.7865107  6.8118816 9.3283355

PS2_TS2 <- analyse_func(data_1RM,  "PS2", "TS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff -0.6662657 -1.8312715 0.4321591
#>    SD Diff  4.2112919  3.6186068 5.0396329
#>        MAD  3.5473282  2.9625700 4.2314086
#>        RSE  4.1026745  3.5310305 4.9374405
#>       PPER  0.7710925  0.6837198 0.8367766
#>        SDC  8.2489800  7.0996126 9.9273895

PSdiff_TSdiff <- analyse_func(data_1RM,  "PSdiff", "TSdiff")
#> [1] "All values of t are equal to  1 \n Cannot calculate confidence intervals"
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator    value     lower    upper
#>  Mean Diff 0.144093 -1.588040 2.109057
#>    SD Diff 6.726684  5.700858 8.174600
#>        MAD 5.417521  4.470701 6.661617
#>        RSE 0.000000        NA       NA
#>       PPER 1.000000        NA       NA
#>        SDC 0.000000        NA       NA

Predicted vs Observed

PS1_OS1 <- analyse_func(data_1RM,  "PS1", "OS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff -0.1617365 -1.0211939 0.8056034
#>    SD Diff  3.3630665  2.7931604 4.2245840
#>        MAD  2.6118866  2.1240580 3.2592150
#>        RSE  2.9798010  2.5403930 3.7393982
#>       PPER  0.9001455  0.8124409 0.9451003
#>        SDC  5.9912915  5.1078025 7.5185640

PS2_OS2 <- analyse_func(data_1RM,  "PS2", "OS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower     upper
#>  Mean Diff 0.1813108 -0.6796237 1.0308761
#>    SD Diff 3.0902968  2.5383326 3.9167088
#>        MAD 2.3630119  1.9015453 2.9935957
#>        RSE 3.0158782  2.5267683 3.6938464
#>       PPER 0.8961385  0.8177925 0.9463232
#>        SDC 6.0638295  5.0804082 7.4269759

PSdiff_OSdiff <- analyse_func(data_1RM,  "PSdiff", "OSdiff")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower     upper
#>  Mean Diff 0.3430474 -1.1634691 1.7208791
#>    SD Diff 5.2144206  4.4086620 6.2622832
#>        MAD 4.1600530  3.3683048 5.0470001
#>        RSE 3.0626923  2.4719221 3.9172739
#>       PPER 0.8908925  0.7920281 0.9512094
#>        SDC 6.1579557  4.9701326 7.8762071

Save estimators in data frame:

experiment1 <- rbind(
  data.frame(experiment = "1", comparison = "TS1 vs. TS2", TS1_TS2$estimators),
  data.frame(experiment = "1", comparison = "OS1 vs. OS2", OS1_OS2$estimators),
  data.frame(experiment = "1", comparison = "PS1 vs. PS2", PS1_PS2$estimators),
  data.frame(experiment = "1", comparison = "OS1 vs. TS1", OS1_TS1$estimators),
  data.frame(experiment = "1", comparison = "OS2 vs. TS2", OS2_TS2$estimators),
  data.frame(experiment = "1", comparison = "OSdiff vs. TSdiff", OSdiff_TSdiff$estimators),
  data.frame(experiment = "1", comparison = "PS1 vs. TS1", PS1_TS1$estimators),
  data.frame(experiment = "1", comparison = "PS2 vs. TS2", PS2_TS2$estimators),
  data.frame(experiment = "1", comparison = "PSdiff vs. TSdiff", PSdiff_TSdiff$estimators),
  data.frame(experiment = "1", comparison = "PS1 vs. OS1", PS1_OS1$estimators),
  data.frame(experiment = "1", comparison = "PS2 vs. OS2", PS2_OS2$estimators),
  data.frame(experiment = "1", comparison = "PSdiff vs. OSdiff", PSdiff_OSdiff$estimators)
)

experiment1 <- experiment1 %>%
  mutate(
    comparison = factor(comparison),
    estimator = factor(estimator))

datatable(experiment1, rownames = FALSE, filter = "top") %>%
  formatRound(columns = 4:ncol(experiment1), digits = 2)

Simulation #2

In this simulation there is true change in 1RM, where everyone improves for 10kg. Let’s see how that plays out

set.seed(random.seed)
data_1RM <- tibble(
  Athlete = paste0("Athlete", seq(1, n_athletes)),
  TS1 = rnorm(n_athletes, 100, 10),
  TS2 = TS1 + rnorm(n_athletes, 10, 0),
  TSdiff = TS2 - TS1,
  OS1 = biological_fixed + biological_proportional * TS1 + rnorm(n_athletes, 0, biological_random),
  OS2 = biological_fixed + biological_proportional * TS2 + rnorm(n_athletes, 0, biological_random),
  OSdiff = OS2 - OS1,
  PS1 = measurement_fixed + measurement_proportional * OS1 + rnorm(n_athletes, 0, measurement_random),
  PS2 = measurement_fixed + measurement_proportional * OS2 + rnorm(n_athletes, 0, measurement_random),
  PSdiff = PS2 - PS1
)

datatable(data_1RM, rownames = FALSE, filter = "top") %>%
  formatRound(columns = 2:ncol(data_1RM), digits = 2)

Plotting and analysis

Repeatability

TS1_TS2 <- analyse_func(data_1RM,  "TS1", "TS2")
#> [1] "All values of t are equal to  10 \n Cannot calculate confidence intervals"
#> [1] "All values of t are equal to  1 \n Cannot calculate confidence intervals"
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator         value lower        upper
#>  Mean Diff -1.000000e+01    NA           NA
#>    SD Diff  0.000000e+00    NA           NA
#>        MAD  1.000000e+01    NA           NA
#>        RSE  3.204942e-16     0 2.099946e-16
#>       PPER  1.000000e+00    NA           NA
#>        SDC  6.443967e-16     0 4.222225e-16

OS1_OS2 <- analyse_func(data_1RM,  "OS1", "OS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value       lower      upper
#>  Mean Diff -9.8010457 -10.9518448 -8.7756186
#>    SD Diff  3.9189576   3.3863430  5.0397272
#>        MAD  9.8010457   8.7756186 10.9518448
#>        RSE  3.6309941   3.2069235  4.4229834
#>       PPER  0.8251096   0.7361123  0.8744657
#>        SDC  7.3006029   6.4479519  8.8930041

PS1_PS2 <- analyse_func(data_1RM,  "PS1", "PS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator       value       lower      upper
#>  Mean Diff -10.1440930 -12.1090574 -8.4119603
#>    SD Diff   6.7266837   5.7008578  8.1745999
#>        MAD  10.3115951   8.6279317 12.2735568
#>        RSE   5.7337687   4.9491072  6.9338084
#>       PPER   0.6124651   0.5256624  0.6825777
#>        SDC  11.5285146   9.9508470 13.9413562

Validity (Prediction)

Observed vs True

OS1_TS1 <- analyse_func(data_1RM,  "OS1", "TS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value     lower      upper
#>  Mean Diff -0.6486222 -1.412789 0.07847829
#>    SD Diff  2.7669426  2.368255 3.33230593
#>        MAD  2.2802496  1.822822 2.77875002
#>        RSE  2.5832908  2.216105 3.13354241
#>       PPER  0.9411704  0.882704 0.97134208
#>        SDC  5.1940542  4.455777 6.30040928

OS2_TS2 <- analyse_func(data_1RM,  "OS2", "TS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower      upper
#>  Mean Diff -0.8475765 -1.5309040 -0.1400448
#>    SD Diff  2.5523202  2.2026842  3.0327781
#>        MAD  2.1829967  1.7815176  2.6322982
#>        RSE  2.5678697  2.2188811  3.1095988
#>       PPER  0.9426184  0.8854911  0.9711095
#>        SDC  5.1630480  4.4613595  6.2522674

OSdiff_TSdiff <- analyse_func(data_1RM,  "OSdiff", "TSdiff")
#> [1] "All values of t are equal to  1 \n Cannot calculate confidence intervals"
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator         value         lower        upper
#>  Mean Diff -1.989543e-01 -1.224381e+00 9.518448e-01
#>    SD Diff  3.918958e+00  3.386343e+00 5.039727e+00
#>        MAD  3.286563e+00  2.769822e+00 3.981160e+00
#>        RSE  1.061678e-14  9.701006e-15 1.073966e-14
#>       PPER  1.000000e+00            NA           NA
#>        SDC  2.134648e-14  1.950518e-14 2.159353e-14

Predicted vs True

PS1_TS1 <- analyse_func(data_1RM,  "PS1", "TS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff -0.8103587 -2.0602135 0.4857175
#>    SD Diff  4.6875924  4.0168326 5.6232430
#>        MAD  3.7346413  3.0057163 4.6084908
#>        RSE  3.8726629  3.3879259 4.6394978
#>       PPER  0.7971479  0.7134523 0.8534933
#>        SDC  7.7865107  6.8118816 9.3283355

PS2_TS2 <- analyse_func(data_1RM,  "PS2", "TS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff -0.6662657 -1.8312715 0.4321591
#>    SD Diff  4.2112919  3.6186068 5.0396329
#>        MAD  3.5473282  2.9625700 4.2314086
#>        RSE  4.1026745  3.5310305 4.9374405
#>       PPER  0.7710925  0.6837198 0.8367766
#>        SDC  8.2489800  7.0996126 9.9273895

PSdiff_TSdiff <- analyse_func(data_1RM,  "PSdiff", "TSdiff")
#> [1] "All values of t are equal to  1 \n Cannot calculate confidence intervals"
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator        value         lower        upper
#>  Mean Diff 1.440930e-01 -1.588040e+00 2.109057e+00
#>    SD Diff 6.726684e+00  5.700858e+00 8.174600e+00
#>        MAD 5.417521e+00  4.470701e+00 6.661617e+00
#>        RSE 1.073058e-14  1.030554e-14 1.076850e-14
#>       PPER 1.000000e+00            NA           NA
#>        SDC 2.157529e-14  2.072067e-14 2.165152e-14

Predicted vs Observed

PS1_OS1 <- analyse_func(data_1RM,  "PS1", "OS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff -0.1617365 -1.0211939 0.8056034
#>    SD Diff  3.3630665  2.7931604 4.2245840
#>        MAD  2.6118866  2.1240580 3.2592150
#>        RSE  2.9798010  2.5403930 3.7393982
#>       PPER  0.9001455  0.8124409 0.9451003
#>        SDC  5.9912915  5.1078025 7.5185640

PS2_OS2 <- analyse_func(data_1RM,  "PS2", "OS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower     upper
#>  Mean Diff 0.1813108 -0.6796237 1.0308761
#>    SD Diff 3.0902968  2.5383326 3.9167088
#>        MAD 2.3630119  1.9015453 2.9935957
#>        RSE 3.0158782  2.5267683 3.6938464
#>       PPER 0.8961385  0.8177925 0.9463232
#>        SDC 6.0638295  5.0804082 7.4269759

PSdiff_OSdiff <- analyse_func(data_1RM,  "PSdiff", "OSdiff")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower     upper
#>  Mean Diff 0.3430474 -1.1634691 1.7208791
#>    SD Diff 5.2144206  4.4086620 6.2622832
#>        MAD 4.1600530  3.3683048 5.0470001
#>        RSE 3.0626923  2.4719221 3.9172739
#>       PPER 0.8908925  0.7920281 0.9512094
#>        SDC 6.1579557  4.9701326 7.8762071

Save estimators in data frame:

experiment2 <- rbind(
  data.frame(experiment = "2", comparison = "TS1 vs. TS2", TS1_TS2$estimators),
  data.frame(experiment = "2", comparison = "OS1 vs. OS2", OS1_OS2$estimators),
  data.frame(experiment = "2", comparison = "PS1 vs. PS2", PS1_PS2$estimators),
  data.frame(experiment = "2", comparison = "OS1 vs. TS1", OS1_TS1$estimators),
  data.frame(experiment = "2", comparison = "OS2 vs. TS2", OS2_TS2$estimators),
  data.frame(experiment = "2", comparison = "OSdiff vs. TSdiff", OSdiff_TSdiff$estimators),
  data.frame(experiment = "2", comparison = "PS1 vs. TS1", PS1_TS1$estimators),
  data.frame(experiment = "2", comparison = "PS2 vs. TS2", PS2_TS2$estimators),
  data.frame(experiment = "2", comparison = "PSdiff vs. TSdiff", PSdiff_TSdiff$estimators),
  data.frame(experiment = "2", comparison = "PS1 vs. OS1", PS1_OS1$estimators),
  data.frame(experiment = "2", comparison = "PS2 vs. OS2", PS2_OS2$estimators),
  data.frame(experiment = "2", comparison = "PSdiff vs. OSdiff", PSdiff_OSdiff$estimators)
)

experiment2 <- experiment2 %>%
  mutate(
    comparison = factor(comparison),
    estimator = factor(estimator))

datatable(experiment2, rownames = FALSE, filter = "top") %>%
  formatRound(columns = 4:ncol(experiment2), digits = 2)

Simulation #3

In this simulation there is true change in 1RM, where everyone improves, but this time variable (mean = 10, sd = 5). Let’s see how that plays out

set.seed(random.seed)

data_1RM <- tibble(
  Athlete = paste0("Athlete", seq(1, n_athletes)),
  TS1 = rnorm(n_athletes, 100, 10),
  TS2 = TS1 + rnorm(n_athletes, 10, 5),
  TSdiff = TS2 - TS1,
  OS1 = biological_fixed + biological_proportional * TS1 + rnorm(n_athletes, 0, biological_random),
  OS2 = biological_fixed + biological_proportional * TS2 + rnorm(n_athletes, 0, biological_random),
  OSdiff = OS2 - OS1,
  PS1 = measurement_fixed + measurement_proportional * OS1 + rnorm(n_athletes, 0, measurement_random),
  PS2 = measurement_fixed + measurement_proportional * OS2 + rnorm(n_athletes, 0, measurement_random),
  PSdiff = PS2 - PS1
)

datatable(data_1RM, rownames = FALSE, filter = "top") %>%
  formatRound(columns = 2:ncol(data_1RM), digits = 2)

Plotting and analysis

Repeatability

TS1_TS2 <- analyse_func(data_1RM,  "TS1", "TS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value       lower      upper
#>  Mean Diff -8.9189630 -10.1307971 -7.6453514
#>    SD Diff  4.6115710   3.9470916  5.5538432
#>        MAD  8.9578068   7.7298742 10.1662669
#>        RSE  4.6312950   3.9821153  5.6201853
#>       PPER  0.7142897   0.6221414  0.7848343
#>        SDC  9.3118426   8.0065795 11.3001399

OS1_OS2 <- analyse_func(data_1RM,  "OS1", "OS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value       lower      upper
#>  Mean Diff -9.6048030 -11.3823253 -7.7025302
#>    SD Diff  6.7343681   5.7274530  8.2999585
#>        MAD 10.1574998   8.4501656 11.6874232
#>        RSE  6.7602780   5.8218479  8.3050045
#>       PPER  0.5368641   0.4501079  0.6053081
#>        SDC 13.5924499  11.7056096 16.6983306

PS1_PS2 <- analyse_func(data_1RM,  "PS1", "PS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower      upper
#>  Mean Diff -9.6707868 -11.882981 -7.4360246
#>    SD Diff  7.9074379   6.782576  9.5692373
#>        MAD 10.5196874   8.620761 12.3702870
#>        RSE  7.9764946   6.888788  9.7321362
#>       PPER  0.4662668   0.390257  0.5285653
#>        SDC 16.0378172  13.850836 19.5677714

Validity (Prediction)

Observed vs True

OS1_TS1 <- analyse_func(data_1RM,  "OS1", "TS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower      upper
#>  Mean Diff -0.8475765 -1.5309040 -0.1400448
#>    SD Diff  2.5523202  2.2026842  3.0327781
#>        MAD  2.1829967  1.7815176  2.6322982
#>        RSE  2.5678697  2.2188811  3.1095988
#>       PPER  0.9426184  0.8854911  0.9711095
#>        SDC  5.1630480  4.4613595  6.2522674

OS2_TS2 <- analyse_func(data_1RM,  "OS2", "TS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff -0.1617365 -1.0211939 0.8056034
#>    SD Diff  3.3630665  2.7931604 4.2245840
#>        MAD  2.6118866  2.1240580 3.2592150
#>        RSE  2.9964181  2.5632669 3.7849629
#>       PPER  0.8983040  0.8068333 0.9429202
#>        SDC  6.0247023  5.1537935 7.6101780

OSdiff_TSdiff <- analyse_func(data_1RM,  "OSdiff", "TSdiff")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower     upper
#>  Mean Diff 0.6858400 -0.4322269 1.8146443
#>    SD Diff 4.1343727  3.5793771 4.9704161
#>        MAD 3.4791955  2.9056326 4.1869027
#>        RSE 2.8119809  2.4368833 3.5244330
#>       PPER 0.9182789  0.8369433 0.9542845
#>        SDC 5.6538666  4.8996823 7.0863475

Predicted vs True

PS1_TS1 <- analyse_func(data_1RM,  "PS1", "TS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff -0.6662657 -1.8312715 0.4321591
#>    SD Diff  4.2112919  3.6186068 5.0396329
#>        MAD  3.5473282  2.9625700 4.2314086
#>        RSE  4.1026745  3.5310305 4.9374405
#>       PPER  0.7710925  0.6837198 0.8367766
#>        SDC  8.2489800  7.0996126 9.9273895

PS2_TS2 <- analyse_func(data_1RM,  "PS2", "TS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value     lower      upper
#>  Mean Diff 0.0855581 -1.196799  1.4492028
#>    SD Diff 4.8396908  4.057859  5.8318617
#>        MAD 3.8692842  3.141164  4.7630852
#>        RSE 4.4014128  3.739475  5.4671137
#>       PPER 0.7384028  0.635042  0.8125517
#>        SDC 8.8496336  7.518719 10.9923689

PSdiff_TSdiff <- analyse_func(data_1RM,  "PSdiff", "TSdiff")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower     upper
#>  Mean Diff 0.7518238 -0.9275746 2.4979527
#>    SD Diff 6.2996217  5.3560794 7.4951405
#>        MAD 5.1529431  4.2207781 6.1842849
#>        RSE 3.7106093  3.1525026 4.7508912
#>       PPER 0.8158504  0.7021780 0.8807171
#>        SDC 7.4606799  6.3385314 9.5523069

Predicted vs Observed

PS1_OS1 <- analyse_func(data_1RM,  "PS1", "OS1")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower     upper
#>  Mean Diff 0.1813108 -0.6796237 1.0308761
#>    SD Diff 3.0902968  2.5383326 3.9167088
#>        MAD 2.3630119  1.9015453 2.9935957
#>        RSE 3.0158782  2.5267683 3.6938464
#>       PPER 0.8961385  0.8177925 0.9463232
#>        SDC 6.0638295  5.0804082 7.4269759

PS2_OS2 <- analyse_func(data_1RM,  "PS2", "OS2")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator     value      lower    upper
#>  Mean Diff 0.2472946 -0.7795265 1.134673
#>    SD Diff 3.4881795  2.9257176 4.208746
#>        MAD 2.7469691  2.1998625 3.371836
#>        RSE 3.4838795  2.9903921 4.302615
#>       PPER 0.8422803  0.7490526 0.898956
#>        SDC 7.0048091  6.0125863 8.650987

PSdiff_OSdiff <- analyse_func(data_1RM,  "PSdiff", "OSdiff")
#> Bootstrap with 2000 resamples and 95% bca confidence intervals.
#> 
#>  estimator      value      lower     upper
#>  Mean Diff 0.06598376 -1.0052804 1.2527324
#>    SD Diff 4.16947276  3.4743992 5.2334076
#>        MAD 3.39029586  2.7938717 4.0918165
#>        RSE 3.58770417  2.9635487 4.5261121
#>       PPER 0.83015687  0.7251217 0.9019114
#>        SDC 7.21356271  5.9586141 9.1003583

Save estimators in data frame:

experiment3 <- rbind(
  data.frame(experiment = "3", comparison = "TS1 vs. TS2", TS1_TS2$estimators),
  data.frame(experiment = "3", comparison = "OS1 vs. OS2", OS1_OS2$estimators),
  data.frame(experiment = "3", comparison = "PS1 vs. PS2", PS1_PS2$estimators),
  data.frame(experiment = "3", comparison = "OS1 vs. TS1", OS1_TS1$estimators),
  data.frame(experiment = "3", comparison = "OS2 vs. TS2", OS2_TS2$estimators),
  data.frame(experiment = "3", comparison = "OSdiff vs. TSdiff", OSdiff_TSdiff$estimators),
  data.frame(experiment = "3", comparison = "PS1 vs. TS1", PS1_TS1$estimators),
  data.frame(experiment = "3", comparison = "PS2 vs. TS2", PS2_TS2$estimators),
  data.frame(experiment = "3", comparison = "PSdiff vs. TSdiff", PSdiff_TSdiff$estimators),
  data.frame(experiment = "3", comparison = "PS1 vs. OS1", PS1_OS1$estimators),
  data.frame(experiment = "3", comparison = "PS2 vs. OS2", PS2_OS2$estimators),
  data.frame(experiment = "3", comparison = "PSdiff vs. OSdiff", PSdiff_OSdiff$estimators)
)

experiment3<- experiment3 %>%
  mutate(
    comparison = factor(comparison),
    estimator = factor(estimator))

datatable(experiment3, rownames = FALSE, filter = "top") %>%
  formatRound(columns = 4:ncol(experiment3), digits = 2)

Combined

experiment_data <- rbind(
  experiment1,
  experiment2,
  experiment3
)

experiment_data <- experiment_data %>%
  mutate(
    experiment = factor(experiment),
    comparison = factor(comparison),
    estimator = factor(estimator))

datatable(experiment_data, rownames = FALSE, filter = "top") %>%
  formatRound(columns = 4:ncol(experiment_data), digits = 2)